We use it to avoid VMExits on FS_BASE and GS_BASE MSR accesses.
Signed-off-by: Weidong Han <weidong.han@intel.com>
Signed-off-by: Xin Li <xin.b.li@intel.com>
Signed-off-by: Keir Fraser <keir@xensource.com>
/* I/O permission bitmap is globally shared by all HVM guests. */
char __attribute__ ((__section__ (".bss.page_aligned")))
hvm_io_bitmap[3*PAGE_SIZE];
+/* MSR permission bitmap is globally shared by all HVM guests. */
+char __attribute__ ((__section__ (".bss.page_aligned")))
+ hvm_msr_bitmap[PAGE_SIZE];
void hvm_enable(struct hvm_function_table *fns)
{
memset(hvm_io_bitmap, ~0, sizeof(hvm_io_bitmap));
clear_bit(0x80, hvm_io_bitmap);
+ /* All MSR accesses are intercepted by default. */
+ memset(hvm_msr_bitmap, ~0, sizeof(hvm_msr_bitmap));
+
hvm_funcs = *fns;
hvm_enabled = 1;
}
return ctl;
}
+static void disable_intercept_for_msr(u32 msr)
+{
+ /*
+ * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address).
+ * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+ */
+ if ( msr <= 0x1fff )
+ {
+ __clear_bit(msr, hvm_msr_bitmap + 0x000); /* read-low */
+ __clear_bit(msr, hvm_msr_bitmap + 0x400); /* write-low */
+ }
+ else if ( (msr >= 0xc0000000) && (msr <= 0xc0001fff) )
+ {
+ msr &= 0x1fff;
+ __clear_bit(msr, hvm_msr_bitmap + 0x800); /* read-high */
+ __clear_bit(msr, hvm_msr_bitmap + 0xc00); /* write-high */
+ }
+}
+
void vmx_init_vmcs_config(void)
{
u32 vmx_msr_low, vmx_msr_high, min, max;
#ifdef __x86_64__
min = max |= CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING;
#endif
+ max |= CPU_BASED_ACTIVATE_MSR_BITMAP;
_vmx_cpu_based_exec_control = adjust_vmx_controls(
min, max, MSR_IA32_VMX_PROCBASED_CTLS_MSR);
vmx_cpu_based_exec_control = _vmx_cpu_based_exec_control;
vmx_vmexit_control = _vmx_vmexit_control;
vmx_vmentry_control = _vmx_vmentry_control;
+
+ disable_intercept_for_msr(MSR_FS_BASE);
+ disable_intercept_for_msr(MSR_GS_BASE);
}
else
{
__vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmx_cpu_based_exec_control);
v->arch.hvm_vcpu.u.vmx.exec_control = vmx_cpu_based_exec_control;
+ if ( vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_MSR_BITMAP )
+ __vmwrite(MSR_BITMAP, virt_to_maddr(hvm_msr_bitmap));
+
/* I/O access bitmap. */
__vmwrite(IO_BITMAP_A, virt_to_maddr(hvm_io_bitmap));
__vmwrite(IO_BITMAP_B, virt_to_maddr(hvm_io_bitmap + PAGE_SIZE));
/* End of save/restore */
extern char hvm_io_bitmap[];
+extern char hvm_msr_bitmap[];
extern int hvm_enabled;
void hvm_enable(struct hvm_function_table *);
#define CPU_BASED_MOV_DR_EXITING 0x00800000
#define CPU_BASED_UNCOND_IO_EXITING 0x01000000
#define CPU_BASED_ACTIVATE_IO_BITMAP 0x02000000
+#define CPU_BASED_ACTIVATE_MSR_BITMAP 0x10000000
#define CPU_BASED_MONITOR_EXITING 0x20000000
#define CPU_BASED_PAUSE_EXITING 0x40000000
IO_BITMAP_A_HIGH = 0x00002001,
IO_BITMAP_B = 0x00002002,
IO_BITMAP_B_HIGH = 0x00002003,
+ MSR_BITMAP = 0x00002004,
+ MSR_BITMAP_HIGH = 0x00002005,
VM_EXIT_MSR_STORE_ADDR = 0x00002006,
VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007,
VM_EXIT_MSR_LOAD_ADDR = 0x00002008,
#define MSR_LSTAR 0xc0000082 /* long mode SYSCALL target */
#define MSR_CSTAR 0xc0000083 /* compatibility mode SYSCALL target */
#define MSR_SYSCALL_MASK 0xc0000084 /* EFLAGS mask for syscall */
-#define MSR_FS_BASE 0xc0000100 /* 64bit GS base */
-#define MSR_GS_BASE 0xc0000101 /* 64bit FS base */
+#define MSR_FS_BASE 0xc0000100 /* 64bit FS base */
+#define MSR_GS_BASE 0xc0000101 /* 64bit GS base */
#define MSR_SHADOW_GS_BASE 0xc0000102 /* SwapGS GS shadow */
/* EFER bits: */
#define _EFER_SCE 0 /* SYSCALL/SYSRET */